if ( !l2_pgentry_empty(*l2e) ) continue;
page = (unsigned long)get_free_page(GFP_KERNEL);
clear_page(page);
- *l2e = mk_l2_pgentry(__pa(page) | PAGE_HYPERVISOR);
+ *l2e = mk_l2_pgentry(__pa(page) | __PAGE_HYPERVISOR);
vaddr += 1 << L2_PAGETABLE_SHIFT;
}
}
ioremap_pt = (void *)get_free_page(GFP_KERNEL);
clear_page(ioremap_pt);
idle0_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] =
- mk_l2_pgentry(__pa(ioremap_pt) | PAGE_HYPERVISOR);
+ mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
/* Create read-only mapping of MPT for guest-OS use. */
idle0_pg_table[READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
pl2e = idle_pg_table[nr] + (MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT);
mapcache[nr] = (unsigned long *)get_free_page(GFP_KERNEL);
clear_page(mapcache[nr]);
- *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | PAGE_HYPERVISOR);
+ *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | __PAGE_HYPERVISOR);
+
+ /* Set up linear page table mapping. */
+ idle_pg_table[nr][LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(__pa(idle_pg_table[nr]) | __PAGE_HYPERVISOR);
init_idle_task();
}
* sizeof(l2_pgentry_t));
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
+ l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(phys_l2tab | __PAGE_HYPERVISOR);
p->mm.pagetable = mk_pagetable(phys_l2tab);
unmap_domain_mem(l2tab);
memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
+ l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(phys_l2tab | __PAGE_HYPERVISOR);
memset(l2tab, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
p->mm.pagetable = mk_pagetable(phys_l2tab);
p_l2_entry[(PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT) -
DOMAIN_ENTRIES_PER_L2_PAGETABLE] =
mk_l2_pgentry(__pa(current->mm.perdomain_pt) | __PAGE_HYPERVISOR);
+ p_l2_entry[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT) -
+ DOMAIN_ENTRIES_PER_L2_PAGETABLE] =
+ mk_l2_pgentry((page_nr << PAGE_SHIFT) | __PAGE_HYPERVISOR);
out:
unmap_domain_mem(p_l2_entry);
case PGREQ_NORMAL:
page = frame_table + pfn;
flags = page->flags;
-
+
if ( DOMAIN_OKAY(flags) )
{
switch ( (flags & PG_type_mask) )
#include <asm/atomic.h>
atomic_t tlb_flush_count[NR_CPUS];
-#define __read_cr3(__var) \
- do { \
- __asm__ __volatile ( \
- "movl %%cr3, %0;" \
- : "=r" (__var)); \
- } while (0)
#define __write_cr3_counted(__pa) \
do { \
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
-#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+#define current_text_addr() \
+ ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
return edx;
}
+
+/*
+ * Intel CPU flags in CR0
+ */
+#define X86_CR0_PE 0x00000001 /* Enable Protected Mode (RW) */
+#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor (RW) */
+#define X86_CR0_EM 0x00000004 /* Require FPU Emulation (RO) */
+#define X86_CR0_TS 0x00000008 /* Task Switched (RW) */
+#define X86_CR0_NE 0x00000020 /* Numeric Error Reporting (RW) */
+#define X86_CR0_WP 0x00010000 /* Supervisor Write Protect (RW) */
+#define X86_CR0_AM 0x00040000 /* Alignment Checking (RW) */
+#define X86_CR0_NW 0x20000000 /* Not Write-Through (RW) */
+#define X86_CR0_CD 0x40000000 /* Cache Disable (RW) */
+#define X86_CR0_PG 0x80000000 /* Paging (RW) */
+
+#define read_cr0() ({ \
+ unsigned int __dummy; \
+ __asm__( \
+ "movl %%cr0,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+
+#define write_cr0(x) \
+ __asm__("movl %0,%%cr0": :"r" (x));
+
+
+
/*
* Intel CPU features in CR4
*/
#include <xeno/config.h>
#include <asm/bitops.h>
-struct task_struct;
-extern void switch_to(struct task_struct *prev,
- struct task_struct *next);
-
/* Clear and set 'TS' bit respectively */
#define clts() __asm__ __volatile__ ("clts")
-#define read_cr0() ({ \
- unsigned int __dummy; \
- __asm__( \
- "movl %%cr0,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
-})
-#define write_cr0(x) \
- __asm__("movl %0,%%cr0": :"r" (x));
-
-#define read_cr4() ({ \
- unsigned int __dummy; \
- __asm__( \
- "movl %%cr4,%0\n\t" \
- :"=r" (__dummy)); \
- __dummy; \
-})
-#define write_cr4(x) \
- __asm__("movl %0,%%cr4": :"r" (x));
-#define stts() write_cr0(8 | read_cr0())
+#define stts() write_cr0(X86_CR0_TS|read_cr0())
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory");
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
-
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
-/*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
- */
-static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
-{
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- "cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
-}
-
-static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-}
-#define ll_low(x) *(((unsigned int*)&(x))+0)
-#define ll_high(x) *(((unsigned int*)&(x))+1)
-
-static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,ll_low(value), ll_high(value));
-}
-
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
-
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
-
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
#define READONLY_MPT_VIRT_START (HYPERVISOR_VIRT_START)
#define READONLY_MPT_VIRT_END (READONLY_MPT_VIRT_START + (4*1024*1024))
/*
- * Next 16MB is fixed monitor space, which is part of a 48MB direct-mapped
+ * Next 16MB is fixed monitor space, which is part of a 44MB direct-mapped
* memory region. The following are machine addresses.
*/
#define MAX_MONITOR_ADDRESS (16*1024*1024)
#define MAX_DMA_ADDRESS (16*1024*1024)
-#define MAX_DIRECTMAP_ADDRESS (48*1024*1024)
+#define MAX_DIRECTMAP_ADDRESS (44*1024*1024)
/* And the virtual addresses for the direct-map region... */
#define DIRECTMAP_VIRT_START (READONLY_MPT_VIRT_END)
#define DIRECTMAP_VIRT_END (DIRECTMAP_VIRT_START + MAX_DIRECTMAP_ADDRESS)
#define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + (4*1024*1024))
#define FRAMETABLE_VIRT_START (RDWR_MPT_VIRT_END)
#define FRAMETABLE_VIRT_END (DIRECTMAP_VIRT_END)
+/* Next 4MB of virtual address space is used as a linear p.t. mapping. */
+#define LINEAR_PT_VIRT_START (DIRECTMAP_VIRT_END)
+#define LINEAR_PT_VIRT_END (LINEAR_PT_VIRT_START + (4*1024*1024))
/* Next 4MB of virtual address space used for per-domain mappings (eg. GDT). */
-#define PERDOMAIN_VIRT_START (DIRECTMAP_VIRT_END)
+#define PERDOMAIN_VIRT_START (LINEAR_PT_VIRT_END)
#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + (4*1024*1024))
#define GDT_VIRT_START (PERDOMAIN_VIRT_START)
#define GDT_VIRT_END (GDT_VIRT_START + (64*1024))
asmlinkage void __enter_scheduler(void);
#define schedule() __schedule_not_callable_in_xen()
+extern void switch_to(struct task_struct *prev,
+ struct task_struct *next);
+
+
/* A compatibility hack for Linux drivers. */
#define MAX_SCHEDULE_TIMEOUT 0UL
static inline long schedule_timeout(long timeout)